Cleanups for printing debug tracing to the console.
Also, rename get_cpu_user_regs -> guest_cpu_user_regs.
Signed-off-by: Keir Fraser <keir@xensource.com>
return;
}
-struct pt_regs *get_cpu_user_regs(void) { return ia64_task_regs(current); }
+struct pt_regs *guest_cpu_user_regs(void) { return ia64_task_regs(current); }
void raise_actimer_softirq(void)
{
if ( unlikely(!all_segs_okay) )
{
- struct cpu_user_regs *regs = get_cpu_user_regs();
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned long *rsp =
(n->arch.flags & TF_kernel_mode) ?
(unsigned long *)regs->rsp :
long do_switch_to_user(void)
{
- struct cpu_user_regs *regs = get_cpu_user_regs();
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
struct switch_to_user stu;
struct exec_domain *ed = current;
static void __context_switch(void)
{
- struct cpu_user_regs *stack_regs = get_cpu_user_regs();
+ struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
unsigned int cpu = smp_processor_id();
struct exec_domain *p = percpu_ctxt[cpu].curr_ed;
struct exec_domain *n = current;
}
else
{
- regs = get_cpu_user_regs();
+ regs = guest_cpu_user_regs();
#if defined(__i386__)
regs->eax = op;
regs->eip -= 2; /* re-execute 'int 0x82' */
return EXCRET_fault_fixed;
emulate:
- if ( x86_emulate_memop(get_cpu_user_regs(), addr,
+ if ( x86_emulate_memop(guest_cpu_user_regs(), addr,
&ptwr_mem_emulator, BITS_PER_LONG/8) )
return 0;
perfc_incrc(ptwr_emulations);
asmlinkage void spurious_interrupt_bug(void);
asmlinkage void machine_check(void);
+static int debug_stack_lines = 20;
+integer_param("debug_stack_lines", debug_stack_lines);
+
+static inline int kernel_text_address(unsigned long addr)
+{
+ if (addr >= (unsigned long) &_stext &&
+ addr <= (unsigned long) &_etext)
+ return 1;
+ return 0;
+
+}
+
+void show_guest_stack(void)
+{
+ int i;
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ unsigned long *stack = (unsigned long *)regs->esp, addr;
+
+ printk("Guest stack trace from "__OP"sp=%p:\n ", stack);
+
+ for ( i = 0; i < (debug_stack_lines*8); i++ )
+ {
+ if ( ((long)stack & (STACK_SIZE-1)) == 0 )
+ break;
+ if ( get_user(addr, stack) )
+ {
+ if ( i != 0 )
+ printk("\n ");
+ printk("Fault while accessing guest memory.");
+ i = 1;
+ break;
+ }
+ if ( (i != 0) && ((i % 8) == 0) )
+ printk("\n ");
+ printk("%p ", _p(addr));
+ stack++;
+ }
+ if ( i == 0 )
+ printk("Stack empty.");
+ printk("\n");
+}
+
+void show_trace(unsigned long *esp)
+{
+ unsigned long *stack = esp, addr;
+ int i = 0;
+
+ printk("Xen call trace from "__OP"sp=%p:\n ", stack);
+
+ while ( ((long) stack & (STACK_SIZE-1)) != 0 )
+ {
+ addr = *stack++;
+ if ( kernel_text_address(addr) )
+ {
+ if ( (i != 0) && ((i % 6) == 0) )
+ printk("\n ");
+ printk("[<%p>] ", _p(addr));
+ i++;
+ }
+ }
+ if ( i == 0 )
+ printk("Trace empty.");
+ printk("\n");
+}
+
+void show_stack(unsigned long *esp)
+{
+ unsigned long *stack = esp, addr;
+ int i;
+
+ printk("Xen stack trace from "__OP"sp=%p:\n ", stack);
+
+ for ( i = 0; i < (debug_stack_lines*8); i++ )
+ {
+ if ( ((long)stack & (STACK_SIZE-1)) == 0 )
+ break;
+ if ( (i != 0) && ((i % 8) == 0) )
+ printk("\n ");
+ addr = *stack++;
+ if ( kernel_text_address(addr) )
+ printk("[%p] ", _p(addr));
+ else
+ printk("%p ", _p(addr));
+ }
+ if ( i == 0 )
+ printk("Stack empty.");
+ printk("\n");
+
+ show_trace(esp);
+}
+
/*
* This is called for faults at very unexpected times (e.g., when interrupts
* are disabled). In such situations we can't do much that is safe. We try to
/* vmx_io_assist light-weight version, specific to PIT DM */
static void resume_pit_io(ioreq_t *p)
{
- struct cpu_user_regs *regs = get_cpu_user_regs();
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned long old_eax = regs->eax;
p->state = STATE_INVALID;
vcpu_iodata_t *vio;
ioreq_t *p;
struct domain *d = ed->domain;
- struct cpu_user_regs *regs = get_cpu_user_regs();
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned long old_eax;
int sign;
struct mi_per_cpu_info *mpci_p;
struct Xgt_desc_struct desc;
unsigned long pfn = 0;
struct pfn_info *page;
- struct cpu_user_regs *regs = get_cpu_user_regs();
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
vmx_stts();
set_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
/* All CPUs have their own IDT to allow set_fast_trap(). */
idt_entry_t *idt_tables[NR_CPUS] = { 0 };
-static int kstack_depth_to_print = 8*20;
-
-static inline int kernel_text_address(unsigned long addr)
-{
- if (addr >= (unsigned long) &_stext &&
- addr <= (unsigned long) &_etext)
- return 1;
- return 0;
-
-}
-
-void show_guest_stack(void)
-{
- int i;
- struct cpu_user_regs *regs = get_cpu_user_regs();
- unsigned long *stack = (unsigned long *)regs->esp;
-
- printk("Guest EIP is %08x\n ", regs->eip);
-
- for ( i = 0; i < kstack_depth_to_print; i++ )
- {
- if ( ((long)stack & (STACK_SIZE-1)) == 0 )
- break;
- if ( i && ((i % 8) == 0) )
- printk("\n ");
- printk("%08lx ", *stack++);
- }
- printk("\n");
-
-}
-
-void show_trace(unsigned long *esp)
-{
- unsigned long *stack, addr;
- int i;
-
- printk("Call Trace from ESP=%p:\n ", esp);
- stack = esp;
- i = 0;
- while (((long) stack & (STACK_SIZE-1)) != 0) {
- addr = *stack++;
- if (kernel_text_address(addr)) {
- if (i && ((i % 6) == 0))
- printk("\n ");
- printk("[<%08lx>] ", addr);
- i++;
- }
- }
- printk("\n");
-}
-
-void show_stack(unsigned long *esp)
-{
- unsigned long *stack;
- int i;
-
- printk("Stack trace from ESP=%p:\n ", esp);
-
- stack = esp;
- for ( i = 0; i < kstack_depth_to_print; i++ )
- {
- if ( ((long)stack & (STACK_SIZE-1)) == 0 )
- break;
- if ( i && ((i % 8) == 0) )
- printk("\n ");
- if ( kernel_text_address(*stack) )
- printk("[%08lx] ", *stack++);
- else
- printk("%08lx ", *stack++);
- }
- printk("\n");
-
- show_trace( esp );
-}
-
void show_registers(struct cpu_user_regs *regs)
{
unsigned long ss, ds, es, fs, gs, cs;
#include <xen/sched.h>
#include <asm/msr.h>
-static int kstack_depth_to_print = 8*20;
-
-static inline int kernel_text_address(unsigned long addr)
-{
- if (addr >= (unsigned long) &_stext &&
- addr <= (unsigned long) &_etext)
- return 1;
- return 0;
-
-}
-
-void show_guest_stack(void)
-{
- int i;
- struct cpu_user_regs *regs = get_cpu_user_regs();
- unsigned long *stack = (unsigned long *)regs->rsp;
-
- printk("Guest RIP is %016lx\n ", regs->rip);
-
- for ( i = 0; i < kstack_depth_to_print; i++ )
- {
- if ( ((long)stack & (STACK_SIZE-1)) == 0 )
- break;
- if ( i && ((i % 8) == 0) )
- printk("\n ");
- printk("%016lx ", *stack++);
- }
- printk("\n");
-
-}
-
-void show_trace(unsigned long *rsp)
-{
- unsigned long *stack, addr;
- int i;
-
- printk("Call Trace from RSP=%p:\n ", rsp);
- stack = rsp;
- i = 0;
- while (((long) stack & (STACK_SIZE-1)) != 0) {
- addr = *stack++;
- if (kernel_text_address(addr)) {
- if (i && ((i % 6) == 0))
- printk("\n ");
- printk("[<%016lx>] ", addr);
- i++;
- }
- }
- printk("\n");
-}
-
-void show_stack(unsigned long *rsp)
-{
- unsigned long *stack;
- int i;
-
- printk("Stack trace from RSP=%p:\n ", rsp);
-
- stack = rsp;
- for ( i = 0; i < kstack_depth_to_print; i++ )
- {
- if ( ((long)stack & (STACK_SIZE-1)) == 0 )
- break;
- if ( i && ((i % 8) == 0) )
- printk("\n ");
- if ( kernel_text_address(*stack) )
- printk("[%016lx] ", *stack++);
- else
- printk("%016lx ", *stack++);
- }
- printk("\n");
-
- show_trace(rsp);
-}
-
void show_registers(struct cpu_user_regs *regs)
{
printk("CPU: %d\nEIP: %04lx:[<%016lx>] \nEFLAGS: %016lx\n",
regs->r12, regs->r13, regs->r14, regs->r15);
show_stack((unsigned long *)regs->rsp);
+ if ( GUEST_MODE(regs) )
+ show_guest_stack();
}
void show_page_walk(unsigned long addr)
struct domain *d = current->domain;
if ( d->domain_id == 0 )
- BUG();
+ {
+ show_registers(guest_cpu_user_regs());
+ panic("Domain 0 crashed!\n");
+ }
set_bit(_DOMF_crashed, &d->domain_flags);
switch ( op & SCHEDOP_cmdmask )
{
-
case SCHEDOP_yield:
{
ret = do_yield();
case SCHEDOP_shutdown:
{
- TRACE_3D(TRC_SCHED_SHUTDOWN, current->domain->domain_id, current->vcpu_id,
+ TRACE_3D(TRC_SCHED_SHUTDOWN,
+ current->domain->domain_id, current->vcpu_id,
(op >> SCHEDOP_reasonshift));
domain_shutdown((u8)(op >> SCHEDOP_reasonshift));
break;
static char *debugtrace_buf; /* Debug-trace buffer */
static unsigned int debugtrace_prd; /* Producer index */
static unsigned int debugtrace_kilobytes = 128, debugtrace_bytes;
+static unsigned int debugtrace_used;
static spinlock_t debugtrace_lock = SPIN_LOCK_UNLOCKED;
integer_param("debugtrace", debugtrace_kilobytes);
int _watchdog_on = watchdog_on;
unsigned long flags;
- if ( debugtrace_bytes == 0 )
+ if ( (debugtrace_bytes == 0) || !debugtrace_used )
return;
/* Watchdog can trigger if we print a really large buffer. */
if ( debugtrace_bytes == 0 )
return;
+ debugtrace_used = 1;
+
spin_lock_irqsave(&debugtrace_lock, flags);
ASSERT(debugtrace_buf[debugtrace_bytes - 1] == 0);
void panic(const char *fmt, ...)
{
va_list args;
- char buf[128];
+ char buf[128], cpustr[10];
unsigned long flags;
extern void machine_restart(char *);
/* Spit out multiline message in one go. */
spin_lock_irqsave(&console_lock, flags);
__putstr("\n****************************************\n");
+ __putstr("Panic on CPU");
+ sprintf(cpustr, "%d", smp_processor_id());
+ __putstr(cpustr);
+ __putstr(":\n");
__putstr(buf);
- __putstr("Aieee! CPU");
- sprintf(buf, "%d", smp_processor_id());
- __putstr(buf);
- __putstr(" is toast...\n");
__putstr("****************************************\n\n");
__putstr("Reboot in five seconds...\n");
spin_unlock_irqrestore(&console_lock, flags);
: : "r" (STACK_SIZE-4), "r" (ed) );
}
-static inline struct cpu_user_regs *get_cpu_user_regs(void)
+static inline struct cpu_user_regs *guest_cpu_user_regs(void)
{
struct cpu_user_regs *cpu_user_regs;
__asm__ ( "andl %%esp,%0; addl %2,%0"
#define reset_stack_and_jump(__fn) \
__asm__ __volatile__ ( \
"movl %0,%%esp; jmp "STR(__fn) \
- : : "r" (get_cpu_user_regs()) )
+ : : "r" (guest_cpu_user_regs()) )
#define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)
: : "r" (STACK_SIZE-8), "r" (ed) );
}
-static inline struct cpu_user_regs *get_cpu_user_regs(void)
+static inline struct cpu_user_regs *guest_cpu_user_regs(void)
{
struct cpu_user_regs *cpu_user_regs;
__asm__( "andq %%rsp,%0; addq %2,%0"
#define reset_stack_and_jump(__fn) \
__asm__ __volatile__ ( \
"movq %0,%%rsp; jmp "STR(__fn) \
- : : "r" (get_cpu_user_regs()) )
+ : : "r" (guest_cpu_user_regs()) )
#define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)